Newer
Older
DeepTIAS / Tongue extraction_cropresizemethod / Tongue extraction / Form1.cs
@ke96 ke96 on 15 Oct 2020 46 KB 色抽出実装した
using System;
using System.Windows.Forms;
using System.IO;
using System.Drawing;
using System.Drawing.Imaging;
using OpenCvSharp;
using System.ComponentModel;
using System.Runtime.InteropServices;
using TensorFlow;
using System.Threading;
using System.Diagnostics;

namespace Tongue_extraction
{
    public partial class Form1 : Form
    {
        //Use the RemoveSmallRegionDLL
        [DllImport(@"RemoveSmallRegionDLL.dll", EntryPoint = "RemoveSmallRegion", SetLastError = true, CharSet = CharSet.Ansi, ExactSpelling = false, CallingConvention = CallingConvention.StdCall)]
        extern static void RemoveSmallRegion(string name, string name2, int AreaLimit, int CheckMode, int NeihborMode);

        Mat mat_drawBox = new Mat(1024, 1280, MatType.CV_8UC3, 1); 
        Mat mat_roi = new Mat(1024, 1280, MatType.CV_8UC3, 1);
        Mat mat_input = new Mat(1024, 1280, MatType.CV_8UC3, 1);
        Mat mat_roi256 = new Mat(256, 256, MatType.CV_8UC3, 1);
        Mat mat_roisize = new Mat(1024, 1280, MatType.CV_8UC3, 1);
        Mat mat_output = new Mat(1024, 1280, MatType.CV_8UC1, 1);
        Mat mat_outputNoBox = new Mat(1024, 1280, MatType.CV_8UC1, 1);
        Mat mat_outputChanged = new Mat(1024, 1280, MatType.CV_8UC1, 1);
        Mat mat_mask = new Mat(1024, 1280, MatType.CV_8UC1, 1);
        Mat mat_extraction = new Mat(1024, 1280, MatType.CV_8UC3, 1);
        Mat mat_cropped;
        Mat mat_outputSRGNoBox = new Mat(1024, 1280, MatType.CV_8UC1, 1);
        Mat mat_outputSRG = new Mat(1024, 1280, MatType.CV_8UC1, 1);
        Mat mat_maskSRG = new Mat(1024, 1280, MatType.CV_8UC1, 1);
        Mat mat_gloss = new Mat(1024, 1280, MatType.CV_8UC1, 1);

        public static Bitmap bitmap_bitch;
        string[] path;

        OpenCvSharp.Rect rectangle;

        byte[] byte_inputDetection;
        byte[] byte_inputSegmentation;
        OpenCvSharp.Point P1 = new OpenCvSharp.Point();
        OpenCvSharp.Point P2 = new OpenCvSharp.Point();

        byte[] mask = new byte[200000];

        string basepath;
        string imageFile;
        string time;
        string modelFile;
        int ii = 0;
        int count;
        int areaCount;

        Rect roi = new Rect();
        int mmp = 0;
        int pmm = 0;

        int check_detection = 0;
        float max_score = 0;

        private ManualResetEvent manualReset = new ManualResetEvent(true);

        string fileName_info = Directory.GetCurrentDirectory() + "//info//" + DateTime.Now.ToLocalTime().ToString("yyyyMMddhhmmss") + ".csv";
        StreamWriter sw;

        public Form1()
        {
            InitializeComponent();

            // boundingboxなどのinfo出力用
            sw = new StreamWriter(fileName_info, false, System.Text.Encoding.GetEncoding("shift_jis"));
            sw.WriteLine(
                "image" + "," + 
                "top left X" + "," + "top left Y" + "," + "bottom right X" + "," + "bottom right Y" + "," +
                "Width" + "," + "Height" + "," + "Area" + "," + "Gloss Count");
        }

        [Conditional("DEBUG")]
        private void ShowDebugBox()
        {
            textBox1.Visible = true;
        }

        private void Button_start_Click(object sender, EventArgs e)
        {
            ShowDebugBox();
            path = Directory.GetFiles("data");
            button_start.Enabled = false;
            button_pause.Enabled = true;
            pictureBox_input.Image = null;
            pictureBox_input.Refresh();
            pictureBox_detection.Image = null;
            pictureBox_detection.Refresh();
            pictureBox_cropResized.Image = null;
            pictureBox_cropResized.Refresh();
            pictureBox_output.Image = null;
            pictureBox_output.Refresh();
            pictureBox_outputSRG.Image = null;
            pictureBox_outputSRG.Refresh();
            pictureBox_maskSRG.Image = null;
            pictureBox_maskSRG.Refresh();
            pictureBox_extraction.Image = null;
            pictureBox_extraction.Refresh();
            pictureBox_last.Image = null;
            pictureBox_last.Refresh();
            backgroundWorker1.RunWorkerAsync();
        }

        public static class ImageUtil
        {
            // Convert the image in filename to a Tensor suitable as input to the Inception model.
            public static TFTensor CreateTensorFromImageFile(byte[] contents, TFDataType destinationDataType = TFDataType.UInt8)
            {
                // DecodeJpeg uses a scalar String-valued tensor as input.
                var tensor = TFTensor.CreateString(contents);

                TFGraph graph;
                TFOutput input, output;

                // Construct a graph to normalize the image
                ConstructGraphToNormalizeImage(out graph, out input, out output, destinationDataType);

                // Execute that graph to normalize this one image
                using (var session = new TFSession(graph))
                {
                    var normalized = session.Run(
                             inputs: new[] { input },
                             inputValues: new[] { tensor },
                             outputs: new[] { output });

                    if (session != null)
                    {
                        session.Dispose();
                    }
                    if (tensor != null)
                    {
                        tensor.Dispose();
                    }
                    if (graph != null)
                    {
                        graph.Dispose();
                    }
                    GC.Collect();
                    GC.WaitForPendingFinalizers();
                    GC.Collect();

                    return normalized[0];
                }
            }

            // The inception model takes as input the image described by a Tensor in a very
            // specific normalized format (a particular image size, shape of the input tensor,
            // normalized pixel values etc.).
            //
            // This function constructs a graph of TensorFlow operations which takes as
            // input a JPEG-encoded string and returns a tensor suitable as input to the
            // inception model.

            public static void ConstructGraphToNormalizeImage(out TFGraph graph, out TFOutput input, out TFOutput output, TFDataType destinationDataType = TFDataType.UInt8)
            {
                const int W = 256;
                const int H = 256;
                const float Mean = 0;
                const float Scale = 1;
                graph = new TFGraph();
                input = graph.Placeholder(TFDataType.String);
                output = graph.Cast(graph.Div(
                    x: graph.Sub(
                        x: graph.ResizeBilinear(
                            images: graph.ExpandDims(
                                input: graph.Cast(
                                    graph.DecodeJpeg(contents: input, channels: 3), DstT: destinationDataType),
                                dim: graph.Const(0, "make_batch")),
                            size: graph.Const(new int[] { W, H }, "size")),
                        y: graph.Const(Mean, "mean")),
                    y: graph.Const(Scale, "scale")), destinationDataType);
                GC.Collect();
                GC.WaitForPendingFinalizers();
                GC.Collect();
            }
        }

        public static class ImageUtil2
        {
            // Convert the image in filename to a Tensor suitable as input to the Inception model.
            public static TFTensor CreateTensorFromImageFile(byte[] contents, TFDataType destinationDataType = TFDataType.Float)
            {
                // DecodeJpeg uses a scalar String-valued tensor as input.
                var tensor = TFTensor.CreateString(contents);

                TFGraph graph;
                TFOutput input, output;

                // Construct a graph to normalize the image
                ConstructGraphToNormalizeImage(out graph, out input, out output, destinationDataType);

                // Execute that graph to normalize this one image
                using (var session = new TFSession(graph))
                {
                    var normalized = session.Run(
                             inputs: new[] { input },
                             inputValues: new[] { tensor },
                             outputs: new[] { output });

                    if (session != null)
                    {
                        session.Dispose();
                    }
                    if (tensor != null)
                    {
                        tensor.Dispose();
                    }
                    if (graph != null)
                    {
                        graph.Dispose();
                    }
                    GC.Collect();
                    GC.WaitForPendingFinalizers();
                    GC.Collect();

                    return normalized[0];
                }
            }

            // The inception model takes as input the image described by a Tensor in a very
            // specific normalized format (a particular image size, shape of the input tensor,
            // normalized pixel values etc.).
            //
            // This function constructs a graph of TensorFlow operations which takes as
            // input a JPEG-encoded string and returns a tensor suitable as input to the
            // inception model.

            public static void ConstructGraphToNormalizeImage(out TFGraph graph, out TFOutput input, out TFOutput output, TFDataType destinationDataType = TFDataType.Float)
            {
                const int W = 256;
                const int H = 256;
                //const int W = 512;
                //const int H = 512;

                const float Mean = 128;
                const float Scale = 128;
                graph = new TFGraph();
                input = graph.Placeholder(TFDataType.String);
                output = graph.Cast(graph.Div(
                    x: graph.Sub(
                        x: graph.ResizeBilinear(
                            images: graph.ExpandDims(
                                input: graph.Cast(
                                    graph.DecodeJpeg(contents: input, channels: 3), DstT: TFDataType.Float),
                                dim: graph.Const(0, "make_batch")),
                            size: graph.Const(new int[] { W, H }, "size")),
                        y: graph.Const(Mean, "mean")),
                    y: graph.Const(Scale, "scale")), destinationDataType);
                GC.Collect();
                GC.WaitForPendingFinalizers();
                GC.Collect();
            }
        }

        public static byte[] Bitmap2Byte(Bitmap bitmap)
        {
            using (MemoryStream stream = new MemoryStream())
            {
                bitmap.Save(stream, ImageFormat.Jpeg);
                byte[] data = new byte[stream.Length];
                stream.Seek(0, SeekOrigin.Begin);
                stream.Read(data, 0, Convert.ToInt32(stream.Length));
                return data;
            }
        }

        private static string DownloadDefaultModel(string dir)
        {
            var modelFile = Path.Combine(dir, "2120_256_64_42999_enhancment_L1loss0.03435.pb");
            return modelFile;
        }

        private static string DownloadDefaultModel_noBoxPix2Pix(string dir)
        {
            var modelFile = Path.Combine(dir, "424_256_64_5999_scale300_enhancment_L1loss0.02001.pb");
            return modelFile;
        }

        public static Bitmap ToGrayBitmap(byte[] rawValues, int width, int height)
        {
            //// Apply for a target bitmap variable and lock its memory area
            Bitmap bmp = new Bitmap(width, height, PixelFormat.Format8bppIndexed);
            BitmapData bmpData = bmp.LockBits(new Rectangle(0, 0, width, height),
             ImageLockMode.WriteOnly, PixelFormat.Format8bppIndexed);

            //// Get image parameters
            int stride = bmpData.Stride;  // Width of the scan line
            int offset = stride - width;  // Show gap between width and scan line width
            IntPtr iptr = bmpData.Scan0;  // Get the memory start position of bmpData
            int scanBytes = stride * height;// Use stride width to indicate that this is the size of the memory area

            //// The following is to convert the original display size byte array to the byte array actually stored in memory.
            int posScan = 0, posReal = 0;// Set two position pointers respectively, pointing to the source array and the target array
            byte[] pixelValues = new byte[scanBytes];  //Allocate memory for the target array

            for (int x = 0; x < height; x++)
            {
                //// The following loop section is a simulated line scan
                for (int y = 0; y < width; y++)
                {
                    pixelValues[posScan++] = rawValues[posReal++];
                }
                posScan += offset;  //At the end of the line scan, move the target position pointer over that "gap"
            }

            //// Use Marshal's Copy method to copy the just obtained memory byte array into BitmapData.
            System.Runtime.InteropServices.Marshal.Copy(pixelValues, 0, iptr, scanBytes);
            bmp.UnlockBits(bmpData);  // Unlock the memory area

            //// The following code is to modify the index table of the generated bitmap, from pseudo color to grayscale
            ColorPalette tempPalette;
            using (Bitmap tempBmp = new Bitmap(1, 1, PixelFormat.Format8bppIndexed))
            {
                tempPalette = tempBmp.Palette;
            }
            for (int i = 0; i < 256; i++)
            {
                tempPalette.Entries[i] = Color.FromArgb(i, i, i);
            }

            bmp.Palette = tempPalette;

            //// The algorithm ends here and returns the result.
            return bmp;
        }

        private void BackgroundWorker1_DoWork(object sender, DoWorkEventArgs e)
        {
            using (MemoryStream ms = new MemoryStream())
            {
                for (int a = 0; a < path.Length; a++)
                {
                    manualReset.WaitOne();
                    ii = 0;
                    basepath = Directory.GetCurrentDirectory();
                    imageFile = System.Text.RegularExpressions.Regex.Replace(path[a], "data", "");
                    Invoke((MethodInvoker)delegate
                    {
                        label_processingFileName.Text = "Processing File: " + imageFile;
                        count = a + 1;
                        label_totalProgress.Text = "Total Progress: " + count + "/" + path.Length;
                    });

                    mat_input = Cv2.ImRead(basepath + "\\data" + imageFile, ImreadModes.Color);
                    bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_input);
                    Invoke((MethodInvoker)delegate
                    {
                        pictureBox_input.Image = bitmap_bitch;
                        pictureBox_input.Refresh();
                    });

                    label8.BackColor = Color.White;
                    label1.BackColor = Color.Red;
                    mat_drawBox = mat_input.Clone();
                    mat_cropped = new Mat(mat_input.Size(), MatType.CV_8UC3, 1);
                    byte_inputDetection = Bitmap2Byte(bitmap_bitch);

                    using (var graph = new TFGraph())
                    {
                        var model = File.ReadAllBytes(Directory.GetCurrentDirectory() + "/Detection_Normal.pb");
                        graph.Import(model, "");

                        using (var session = new TFSession(graph))
                        {
                            var tensor = ImageUtil.CreateTensorFromImageFile(byte_inputDetection, TFDataType.UInt8);

                            var runner = session.GetRunner();
                            runner

                                .AddInput(graph["image_tensor"][0], tensor)
                                .Fetch("detection_boxes", "detection_scores", "detection_classes", "num_detections");

                            var output = runner.Run();
                            var boxes = (float[,,])output[0].GetValue();
                            var scores = (float[,])output[1].GetValue();
                            var classes = (float[,])output[2].GetValue();
                            var detections = (float[])output[3].GetValue();

                            check_detection = 0;
                            max_score = 0;

                            for (int i = 0; i < scores.Length; i++)
                            {
                                if ((scores[0, i] > 0.5) && (scores[0, i] > max_score))
                                {
                                    max_score = scores[0, i];
                                    float y_min = boxes[0, i, 0] * (float)bitmap_bitch.Height;
                                    float x_min = boxes[0, i, 1] * (float)bitmap_bitch.Width;
                                    float y_max = boxes[0, i, 2] * (float)bitmap_bitch.Height;
                                    float x_max = boxes[0, i, 3] * (float)bitmap_bitch.Width;
                                    P1.X = (int)x_min;
                                    P1.Y = (int)y_min;
                                    P2.X = (int)x_max;
                                    P2.Y = (int)y_max;
                                    Cv2.Rectangle(mat_drawBox, P1, P2, new Scalar(0, 255, 0), 5);
                                    rectangle.X = (int)x_min;
                                    rectangle.Y = (int)y_min;
                                    rectangle.Width = (int)(x_max - x_min);
                                    rectangle.Height = (int)(y_max - y_min);

                                    check_detection = 1;
                                }
                            }
                        }
                    }

                    // 舌が検出されなかった場合,Detectionされた画像で学習したモデル(CropResize)を使用するのはまずいので
                    // 以前のモデル(Detectionせずに学習)を使用する
                    if (check_detection == 0)
                    {
                        MessageBox.Show("Error: Sorry can not detect any tongue in this image.\nPress [OK] to skip preprocessing.",
                            "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
                        time = DateTime.Now.ToLocalTime().ToString();
                        File.AppendAllText("DetectionFailedLog.txt ", time + " " + imageFile + "\n");
                        Invoke((MethodInvoker)delegate
                        {
                            pictureBox_detection.Image = null;
                            pictureBox_detection.Refresh();
                            pictureBox_cropResized.Image = null;
                            pictureBox_cropResized.Refresh();
                            pictureBox_output.Image = null;
                            pictureBox_output.Refresh();
                            pictureBox_outputSRG.Image = null;
                            pictureBox_outputSRG.Refresh();
                            pictureBox_maskSRG.Image = null;
                            pictureBox_maskSRG.Refresh();
                            pictureBox_extraction.Image = null;
                            pictureBox_extraction.Refresh();
                            pictureBox_last.Image = null;
                            pictureBox_last.Refresh();
                        });
                        bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_input);
                        byte_inputSegmentation = Bitmap2Byte(bitmap_bitch);

                        Thread.Sleep(1000);
                        modelFile = DownloadDefaultModel_noBoxPix2Pix(basepath);
                        using (var graph = new TFGraph())
                        {
                            var model = File.ReadAllBytes(modelFile);
                            graph.Import(model, "");

                            using (var session = new TFSession(graph))
                            {
                                var tensor = ImageUtil2.CreateTensorFromImageFile(byte_inputSegmentation);

                                var runner = session.GetRunner();
                                runner
                                    .AddInput(graph["generator/input_image"][0], tensor)
                                    .Fetch(graph["generator/prediction"][0]);
                                var output = runner.Run();
                                float[,,,] resultfloat = (float[,,,])output[0].GetValue(jagged: false);

                                for (int p = 0; p < 256; p++)
                                {
                                    for (int q = 0; q < 256; q++)
                                    {
                                        float check = resultfloat[0, p, q, 0];
                                        if (check < 0)
                                        {
                                            mask[ii] = 0;
                                        }
                                        else
                                        {
                                            mask[ii] = 255;
                                        }
                                        ii++;
                                    }
                                }
                            }
                        }
                        Thread.Sleep(1000);
                        bitmap_bitch = ToGrayBitmap(mask, 256, 256);
                        mat_outputNoBox = OpenCvSharp.Extensions.BitmapConverter.ToMat(bitmap_bitch);
                        bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_outputNoBox);
                        Invoke((MethodInvoker)delegate
                        {
                            pictureBox_output.Image = bitmap_bitch;
                            pictureBox_output.Refresh();
                            label1.BackColor = Color.White;
                            label4.BackColor = Color.Red;
                        });

                        mat_outputNoBox.SaveImage(basepath + "\\output256" + imageFile);
                        try
                        {
                            RemoveSmallRegion(basepath + "\\output256" + imageFile, basepath + "\\output_changed1" + imageFile, 500, 1, 1);
                            RemoveSmallRegion(basepath + "\\output_changed1" + imageFile, basepath + "\\output_changed2" + imageFile, 500, 0, 0);
                        }
                        catch
                        {
                            MessageBox.Show("Error: Unable to reprocess! Please check is there [RemoveSmallRegionDLL.dll] file in floder?", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
                            break;
                        }
                        mat_outputSRGNoBox = new Mat(basepath + "\\output_changed2" + imageFile, ImreadModes.GrayScale);
                        bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_outputSRGNoBox);
                        Invoke((MethodInvoker)delegate
                        {
                            pictureBox_outputSRG.Image = bitmap_bitch;
                            pictureBox_outputSRG.Refresh();
                            label4.BackColor = Color.White;
                            label5.BackColor = Color.Red;
                        });
                        Cv2.Resize(mat_outputSRGNoBox, mat_mask, mat_input.Size());
                        mat_mask.SaveImage(basepath + "\\mask" + imageFile);
                        try
                        {
                            RemoveSmallRegion(basepath + "\\mask" + imageFile, basepath + "\\mask_changed1" + imageFile, 500, 1, 1);
                            RemoveSmallRegion(basepath + "\\mask_changed1" + imageFile, basepath + "\\mask_changed2" + imageFile, 500, 0, 0);
                        }
                        catch
                        {
                            MessageBox.Show("Error: Unable to reprocess! Please check is there [RemoveSmallRegionDLL.dll] file in floder?", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
                            break;
                        }
                        mat_maskSRG = new Mat(basepath + "\\mask_changed2" + imageFile, ImreadModes.GrayScale);
                        Cv2.Threshold(mat_maskSRG, mat_maskSRG, 128, 255, ThresholdTypes.Binary);
                        bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_maskSRG);
                        Invoke((MethodInvoker)delegate
                        {
                            pictureBox_maskSRG.Image = bitmap_bitch;
                            pictureBox_maskSRG.Refresh();
                            label5.BackColor = Color.White;
                            label6.BackColor = Color.Red;
                        });
                        mat_extraction = mat_input.Clone();
                        areaCount = 0;
                        for (int i = 0; i < mat_input.Height; i++)
                        {
                            for (int j = 0; j < mat_input.Width; j++)
                            {
                                Vec3b pix = mat_extraction.At<Vec3b>(i, j);
                                if (mat_maskSRG.At<int>(i, j) == 0)
                                {
                                    pix[0] = (byte)(255);
                                    pix[1] = (byte)(255);
                                    pix[2] = (byte)(255);
                                    mat_extraction.Set<Vec3b>(i, j, pix);
                                }
                                else
                                {
                                    pix[0] = (byte)(mat_extraction.At<Vec3b>(i, j).Item0);
                                    pix[1] = (byte)(mat_extraction.At<Vec3b>(i, j).Item1);
                                    pix[2] = (byte)(mat_extraction.At<Vec3b>(i, j).Item2);
                                    mat_extraction.Set<Vec3b>(i, j, pix);
                                    areaCount++;
                                }
                            }
                        }
                        mat_extraction.SaveImage(basepath + "\\extraction" + imageFile);
                        bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_extraction);
                        Invoke((MethodInvoker)delegate
                        {
                            pictureBox_extraction.Image = bitmap_bitch;
                            pictureBox_extraction.Refresh();
                        });
                        label6.BackColor = Color.White;



                        label7.BackColor = Color.Red;
                        mat_gloss = mat_input.Clone();
                        double sum = 0.0;
                        double sumsq = 0.0;
                        double avg = 0.0;
                        double stdev = 0.0;
                        for (int i = 0; i < mat_input.Height; i++)
                        {
                            for (int j = 0; j < mat_input.Width; j++)
                            {
                                var g_value = mat_gloss.At<Vec3b>(i, j).Item1;
                                if (mat_maskSRG.At<int>(i, j) == 0)
                                {
                                }
                                else
                                {
                                    sum += g_value;                  // G チャンネル の和
                                    sumsq += g_value * g_value;       // G チャンネル の平方和
                                }
                            }
                        }
                        avg = (double)(sum / areaCount);            // G チャンネル の平均
                        stdev = Math.Sqrt(Math.Abs((sumsq / areaCount) - (avg * avg)));  // 標準偏差
                        double thresh = avg + (stdev * 2.0);
                        int glossCount = 0;
                        for (int i = 0; i < mat_input.Height; i++)
                        {
                            for (int j = 0; j < mat_input.Width; j++)
                            {
                                if (mat_maskSRG.At<int>(i, j) == 0)
                                {
                                }
                                else
                                {
                                    Vec3b pix = mat_gloss.At<Vec3b>(i, j);
                                    if (pix.Item1 > thresh)
                                    {
                                        glossCount++;
                                        pix[0] = (byte)(255);
                                        pix[1] = (byte)(0);
                                        pix[2] = (byte)(0);
                                        mat_gloss.Set<Vec3b>(i, j, pix);
                                    }
                                }

                            }
                        }
                        mat_gloss.SaveImage(basepath + "\\gloss" + imageFile);
                        bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_gloss);
                        Invoke((MethodInvoker)delegate
                        {
                            pictureBox_last.Image = bitmap_bitch;
                            pictureBox_last.Refresh();
                        });
                        label7.BackColor = Color.White;


                        label8.BackColor = Color.Red;
                        //csvにboundingbox情報を保存
                        sw.WriteLine(
                                imageFile.Substring(1) + ","
                                + "None" + "," + "None" + ","
                                + "None" + "," + "None" + ","
                                + "None" + "," + "None" + "," + areaCount.ToString()
                                );

                        continue;
                    }

                    // 舌が正常にDetectionされた場合の処理
                    else
                    {
                        // 検出されたバウンディングボックス画像を保存
                        mat_drawBox.SaveImage(basepath + "\\detection" + imageFile);
                        bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_drawBox);
                        Invoke((MethodInvoker)delegate
                        {
                            pictureBox_detection.Image = bitmap_bitch;
                            pictureBox_detection.Refresh();
                        });
                        label1.BackColor = Color.White;

                        // 検出領域でcropし,256*256にリサイズして表示
                        label2.BackColor = Color.Red;
                        for (int i = P1.Y; i < P2.Y; i++)
                        {
                            for (int j = P1.X; j < P2.X; j++)
                            {
                                // 舌領域以外を黒へ
                                Vec3b pix = mat_input.At<Vec3b>(i, j);
                                mat_cropped.Set<Vec3b>(i, j, pix);
                            }
                        }
                        mat_cropped.SaveImage(basepath + "\\cropped" + imageFile);
                        // 検出領域の範囲を切り出す
                        OpenCvSharp.Size size_roi = new OpenCvSharp.Size();
                        size_roi.Height = rectangle.Height;
                        size_roi.Width = rectangle.Width;
                        roi = new Rect(P1, size_roi);
                        mat_roisize = mat_input.Clone(roi);
                        // セグメンテーションのため,256*256にリサイズ
                        Cv2.Resize(mat_roisize, mat_roi, mat_roi256.Size());
                        mat_roi.SaveImage(basepath + "\\cropresized" + imageFile);
                        bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_roi);
                        Invoke((MethodInvoker)delegate
                        {
                            pictureBox_cropResized.Image = bitmap_bitch;
                            pictureBox_cropResized.Refresh();
                        });
                        label2.BackColor = Color.White;

                        // セグメンテーションを行う
                        label3.BackColor = Color.Red;
                        byte_inputSegmentation = Bitmap2Byte(bitmap_bitch);
                        Thread.Sleep(1000);
                        modelFile = DownloadDefaultModel(basepath);
                        using (var graph = new TFGraph())
                        {
                            var model = File.ReadAllBytes(modelFile);
                            graph.Import(model, "");

                            using (var session = new TFSession(graph))
                            {
                                var tensor = ImageUtil2.CreateTensorFromImageFile(byte_inputSegmentation);
                                var runner = session.GetRunner();
                                runner
                                //.AddInput(graph["generator/input_image"][0], tensor)
                                //.Fetch(graph["generator/prediction"][0]);

                                .AddInput(graph["input_image"][0], tensor)
                                .Fetch(graph["generator1/decoder_1/Tanh"][0]);

                                var output = runner.Run();
                                float[,,,] resultfloat = (float[,,,])output[0].GetValue(jagged: false);

                                for (int p = 0; p < 256; p++)
                                {
                                    for (int q = 0; q < 256; q++)
                                    {
                                        float check = resultfloat[0, p, q, 0];
                                        if (check < 0)
                                        {
                                            mask[ii] = 0;
                                        }
                                        else
                                        {
                                            mask[ii] = 255;
                                        }
                                        ii++;
                                    }
                                }
                            }
                        }
                        GC.Collect();
                        Thread.Sleep(1000);
                        bitmap_bitch = ToGrayBitmap(mask, 256, 256);
                        mat_output = OpenCvSharp.Extensions.BitmapConverter.ToMat(bitmap_bitch);
                        bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_output);
                        Invoke((MethodInvoker)delegate
                        {
                            pictureBox_output.Image = bitmap_bitch;
                            pictureBox_output.Refresh();
                        });
                        label3.BackColor = Color.White;

                        // 舌分割結果の後処理
                        label4.BackColor = Color.Red;
                        // 舌分割結果を保存
                        mat_output.SaveImage(basepath + "\\output256" + imageFile);
                        // 後処理(領域拡張法)でノイズ除去
                        try
                        {
                            RemoveSmallRegion(basepath + "\\output256" + imageFile, basepath + "\\output_changed1" + imageFile, 500, 1, 1);
                            RemoveSmallRegion(basepath + "\\output_changed1" + imageFile, basepath + "\\output_changed2" + imageFile, 500, 0, 0);
                        }
                        catch
                        {
                            MessageBox.Show("Error: Unable to reprocess! Please check is there [RemoveSmallRegionDLL.dll] file in floder?", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
                            break;
                        }
                        mat_outputSRG = new Mat(basepath + "\\output_changed2" + imageFile, ImreadModes.GrayScale);
                        bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_outputSRG);
                        Invoke((MethodInvoker)delegate
                        {
                            pictureBox_outputSRG.Image = bitmap_bitch;
                            pictureBox_outputSRG.Refresh();
                        });
                        label4.BackColor = Color.White;
                        
                        label5.BackColor = Color.Red;
                        // バウンディングボックスのサイズに舌分割結果をリサイズ
                        // この時に2値じゃなくなってるみたい
                        Cv2.Resize(mat_outputSRG, mat_outputChanged, mat_roisize.Size());
                        mat_outputChanged.SaveImage(basepath + "\\output_resized" + imageFile);
                        // 舌検出された領域において舌領域を切り出す
                        mat_mask = new Mat(mat_input.Size(), MatType.CV_8UC1, 1);
                        for (int i = P1.Y; i < P2.Y; i++)
                        {
                            for (int j = P1.X; j < P2.X; j++)
                            {
                                int pix = mat_outputChanged.At<int>(mmp, pmm);
                                mat_mask.Set<int>(i, j, pix);
                                pmm++;
                            }
                            mmp++;
                            pmm = 0;
                        }
                        mmp = 0;
                        Cv2.Resize(mat_mask, mat_mask, mat_input.Size());
                        mat_mask.SaveImage(basepath + "\\mask" + imageFile);
                        // ノイズ処理
                        try
                        {
                            RemoveSmallRegion(basepath + "\\mask" + imageFile, basepath + "\\mask_changed1" + imageFile, 500, 1, 1);
                            RemoveSmallRegion(basepath + "\\mask_changed1" + imageFile, basepath + "\\mask_changed2" + imageFile, 500, 0, 0);
                        }
                        catch
                        {
                            MessageBox.Show("Error: Unable to reprocess! Please check is there [RemoveSmallRegionDLL.dll] file in floder?", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
                            break;
                        }

                        mat_maskSRG = new Mat(basepath + "\\mask_changed2" + imageFile, ImreadModes.GrayScale);
                        Cv2.Threshold(mat_maskSRG, mat_maskSRG, 128, 255, ThresholdTypes.Binary);
                        // 2値マスクの最終結果
                        mat_maskSRG.SaveImage(basepath + "\\mask_final" + imageFile);
                        bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_maskSRG);
                        Invoke((MethodInvoker)delegate
                        {
                            pictureBox_maskSRG.Image = bitmap_bitch;
                            pictureBox_maskSRG.Refresh();
                        });
                        label5.BackColor = Color.White;

                        // 元画像とマスクを合わせて,舌領域を抜き出す
                        label6.BackColor = Color.Red;
                        mat_extraction = mat_input.Clone();
                        areaCount = 0;
                        for (int i = 0; i < mat_input.Height; i++)
                        {
                            for (int j = 0; j < mat_input.Width; j++)
                            {
                                Vec3b pix = mat_extraction.At<Vec3b>(i, j);
                                if (mat_maskSRG.At<int>(i, j) == 0)
                                {
                                    pix[0] = (byte)(255);
                                    pix[1] = (byte)(255);
                                    pix[2] = (byte)(255);
                                    mat_extraction.Set<Vec3b>(i, j, pix);
                                }
                                else
                                {
                                    pix[0] = (byte)(mat_extraction.At<Vec3b>(i, j).Item0);
                                    pix[1] = (byte)(mat_extraction.At<Vec3b>(i, j).Item1);
                                    pix[2] = (byte)(mat_extraction.At<Vec3b>(i, j).Item2);
                                    mat_extraction.Set<Vec3b>(i, j, pix);
                                    areaCount++;
                                }
                            }
                        }
                        mat_extraction.SaveImage(basepath + "\\extraction" + imageFile);
                        bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_extraction);
                        Invoke((MethodInvoker)delegate
                        {
                            pictureBox_extraction.Image = bitmap_bitch;
                            pictureBox_extraction.Refresh();
                        });

                        label6.BackColor = Color.White;

                        // Gloss抽出処理 
                        // extraction結果は使用しない(255,255,255の扱いが煩雑なため)
                        label7.BackColor = Color.Red;
                        mat_gloss = mat_input.Clone();
                        double sum = 0.0;
                        double sumsq = 0.0;
                        double avg = 0.0;
                        double stdev = 0.0;
                        // 2010石川さんの手法にならい,Gchannelのavgとstdを計算
                        for (int i = 0; i < mat_input.Height; i++)
                        {
                            for (int j = 0; j < mat_input.Width; j++)
                            {
                                var g_value = mat_gloss.At<Vec3b>(i, j).Item1;
                                if (mat_maskSRG.At<int>(i, j) == 0)
                                {
                                }
                                else
                                {
                                    sum += g_value;                  // G チャンネル の和
                                    sumsq += g_value * g_value;       // G チャンネル の平方和
                                }
                            }
                        }
                        avg = (double)(sum / areaCount);            // G チャンネル の平均
                        stdev = Math.Sqrt(Math.Abs((sumsq / areaCount) - (avg * avg)));  // 標準偏差

                        // 閾値を決定し,glossをカウント
                        double thresh = avg + (stdev * 2.0);
                        int glossCount = 0;
                        for (int i = 0; i < mat_input.Height; i++)
                        {
                            for (int j = 0; j < mat_input.Width; j++)
                            {
                                Vec3b pix = mat_gloss.At<Vec3b>(i, j);
                                if (mat_maskSRG.At<int>(i, j) == 0)
                                {
                                    pix[0] = (byte)((pix[0] + pix[1] + pix[2]) / 3);
                                    pix[1] = (byte)pix[0];
                                    pix[2] = (byte)pix[0];
                                    mat_gloss.Set<Vec3b>(i, j, pix);
                                }
                                else
                                {
                                    if (pix.Item1 > thresh)
                                    {
                                        glossCount++;
                                        pix[0] = (byte)(255);
                                        pix[1] = (byte)(0);
                                        pix[2] = (byte)(255);
                                        mat_gloss.Set<Vec3b>(i, j, pix);
                                    }
                                    else
                                    {
                                        pix[0] = (byte)((pix[0] + pix[1] + pix[2]) / 3);
                                        pix[1] = (byte)pix[0];
                                        pix[2] = (byte)pix[0];
                                        mat_gloss.Set<Vec3b>(i, j, pix);
                                    }
                                }
                                
                            }
                        }
                        mat_gloss.SaveImage(basepath + "\\gloss" + imageFile);
                        bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_gloss);
                        Invoke((MethodInvoker)delegate
                        {
                            pictureBox_last.Image = bitmap_bitch;
                            pictureBox_last.Refresh();
                        });
                        label7.BackColor = Color.White;
                        
                        // 処理が終わった画像を記録する
                        label8.BackColor = Color.Red;
                        time = DateTime.Now.ToLocalTime().ToString();
                        File.AppendAllText("Log.txt ", time + " " + imageFile + " Done!\n");


                        //csvにboundingbox情報を保存
                        sw.WriteLine(
                                imageFile.Substring(1) + ","
                                + P1.X.ToString() + "," + P1.Y.ToString() + ","
                                + P2.X.ToString() + "," + P2.Y.ToString() + ","
                                + Math.Abs(P1.X - P2.X).ToString() + "," + Math.Abs(P1.Y - P2.Y).ToString() + ","
                                + areaCount.ToString() + "," + glossCount.ToString()
                                );
                    }
                    GC.Collect(); 
                }
                MessageBox.Show("Finished!");

                Invoke((MethodInvoker)delegate
                {
                    button_start.Enabled = true;
                    button_pause.Enabled = false;
                    label_processingFileName.Text = "Processing File: None";
                });
            }
        }

        private void Button_pause_Click(object sender, EventArgs e)
        {
            if (button_pause.Text == "Pause")
            {
                manualReset.Reset();
                button_pause.Text = "Continue";
            }
            else
            {
                manualReset.Set();
                button_pause.Text = "Pause";
            }
        }

        private void Form1_FormClosing(object sender, FormClosingEventArgs e)
        {
            Console.WriteLine("file closing");
            sw.Close();
            Console.WriteLine("file closed");
        }

        private void button_mode_Click(object sender, EventArgs e)
        {

        }
    }
}